--- title: CNN Interpreter keywords: fastai sidebar: home_sidebar summary: "Wrapper around several model interpretability techniques " description: "Wrapper around several model interpretability techniques " ---
{% raw %}
{% endraw %} {% raw %}
{% endraw %} {% raw %}
from fast_impl.visualize import *
{% endraw %} {% raw %}
path = untar_data(URLs.IMAGEWOOF_320)

lbl_dict = dict(
  n02086240= 'Shih-Tzu',
  n02087394= 'Rhodesian ridgeback',
  n02088364= 'Beagle',
  n02089973= 'English foxhound',
  n02093754= 'Australian terrier',
  n02096294= 'Border terrier',
  n02099601= 'Golden retriever',
  n02105641= 'Old English sheepdog',
  n02111889= 'Samoyed',
  n02115641= 'Dingo'
)

dblock = DataBlock(blocks=(ImageBlock,CategoryBlock),
                   get_items=get_image_files,
                   splitter=GrandparentSplitter(valid_name='val'),
                   get_y=Pipeline([parent_label,lbl_dict.__getitem__]),
                   item_tfms=Resize(320),
                   batch_tfms=[*aug_transforms(size=224),Normalize.from_stats(*imagenet_stats)])
dls = dblock.dataloaders(path,bs=32)
{% endraw %}

You can download the model weights here. It's a resnet34, trained for 10 epochs, reaching around 96% accuracy

CAM model (GAP+Linear) weigths are here (94% accuracy)

1. Class Activation Mapping (CAM)

{% raw %}
def get_cam_resnet(arch,num_classes,pretrained=True):
  body = create_body(arch,cut=-2,pretrained=pretrained);
  nf = num_features_model(body)
  head = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)),Flatten(),nn.Linear(nf,num_classes))
  model = nn.Sequential(body,head)
  return model
{% endraw %} {% raw %}
model = get_cam_resnet(resnet34,dls.c,pretrained=False)
learn = Learner(dls,model,model_dir='/content/models',opt_func=ranger,metrics=error_rate)
learn.load('cam-resnet34')
<fastai2.learner.Learner at 0x7f6a4f2501d0>
{% endraw %} {% raw %}
{% endraw %} {% raw %}

generate_cam[source]

generate_cam(model, xb, act_path:list=[0], wt_path:list=[1, -1], with_preds=False)

Show CAM for a given image act_path: list of indices to reach activation maps layer wt_path: list of indices to reach weight layer

{% endraw %} {% raw %}
xb,yb = dls.one_batch()
dls.show_batch((xb,yb))
{% endraw %} {% raw %}
cam,y_preds = generate_cam(learn.model,xb,with_preds=True)
{% endraw %} {% raw %}
{% endraw %} {% raw %}

class CamImage[source]

CamImage(x_dec, y_dec, pred_cls, cam:Tensor)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

get_at[source]

get_at(batch_dec, cam_batch, y_preds, vocab, idx, for_cls=None)

{% endraw %} {% raw %}

show_at[source]

show_at(model, dl, xb, yb, idx, merge=True, for_cls=None, gen='generate_cam')

{% endraw %} {% raw %}
show_at(learn.model,dls.valid,xb,yb,8)
{% endraw %} {% raw %}
show_at(learn.model,dls.valid,xb,yb,8,merge=False)
{% endraw %} {% raw %}
show_at(learn.model,dls.valid,xb,yb,8,for_cls=3)
{% endraw %} {% raw %}
{% endraw %} {% raw %}

show_cam_batch[source]

show_cam_batch(xb, yb, cam_batch, y_preds, max_n=9, merge=True, nrows=None, ncols=None, figsize=None)

{% endraw %} {% raw %}
xb,yb = dls.one_batch()
{% endraw %} {% raw %}
cam_batch,y_preds = generate_cam(learn.model,xb,with_preds=True)
show_cam_batch(xb,yb,cam_batch,y_preds)
{% endraw %} {% raw %}
{% endraw %} {% raw %}

class BaseInterpreter[source]

BaseInterpreter(model:Module, valid_dl:DataLoader=None)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

batch_none[source]

batch_none(xb, yb)

{% endraw %} {% raw %}

class CamInterpreter[source]

CamInterpreter(model:Module, valid_dl:DataLoader=None) :: BaseInterpreter

{% endraw %} {% raw %}
interp = CamInterpreter.from_learner(learn)
{% endraw %} {% raw %}
cam_b,y_preds = interp.generate(with_preds=True)
{% endraw %} {% raw %}
interp.show_batch()
{% endraw %}

2. Grad-CAM

{% raw %}
{% endraw %} {% raw %}

generate_gradcam[source]

generate_gradcam(model, x, y=None, act_path:list=[0], with_preds=False)

Show Grad-CAM for a given image xb,yb: input batch layer_idx: list of indices to reach target layer

{% endraw %} {% raw %}
learn = cnn_learner(dls,resnet34,pretrained=False)
learn.load('resnet34')
<fastai2.learner.Learner at 0x7f0d93d3e630>
{% endraw %} {% raw %}
m = learn.model.eval()
{% endraw %} {% raw %}
dls.valid.shuffle=True
xb,yb = dls.one_batch()
dls.valid.shuffle = True
dls.show_batch((xb,yb))
{% endraw %} {% raw %}
idx=7
gcam,preds = generate_gradcam(m,xb[idx],yb[idx],with_preds=True)
x_dec,y_dec = dls.decode_batch((xb[idx][None],yb[idx][None]))[0]
pred_cls = dls.vocab[preds.argmax().item()]
cam_img = CamImage(x_dec,y_dec,pred_cls,gcam[0])
cam_img.show()
{% endraw %} {% raw %}
for_cls = 1
lbl = dls.vocab[for_cls]
print(f"For Class: {lbl}")
cust_gcam = generate_gradcam(m,xb[idx],tensor(for_cls))
cam_img2 = cam_img.new(lbl,cust_gcam)
cam_img2.show()
For Class: Beagle
{% endraw %} {% raw %}
gcams = []
y_preds = []
for x,y in zip(xb,yb):
  gcam,preds = generate_gradcam(m,x,y,with_preds=True)
  gcams.append(gcam[0])
  y_preds.append(preds[0])
{% endraw %} {% raw %}
gcam_batch,y_preds = torch.stack(gcams),torch.stack(y_preds)
{% endraw %} {% raw %}
{% endraw %} {% raw %}

get_gcam_at[source]

get_gcam_at(batch_dec, cam_batch, y_preds, vocab, idx)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

show_gradcam_batch[source]

show_gradcam_batch(xb, yb, cam_batch, y_preds, vocab, max_n=9, cmap='magma', merge=True, guided=False, nrows=None, ncols=None, figsize=None)

{% endraw %} {% raw %}
show_gradcam_batch(xb,yb,gcam_batch,y_preds,dls.vocab)
{% endraw %} {% raw %}
{% endraw %} {% raw %}

class GradCamInterpreter[source]

GradCamInterpreter(model:Module, valid_dl:DataLoader=None) :: BaseInterpreter

{% endraw %} {% raw %}
interp = GradCamInterpreter.from_learner(learn)
{% endraw %} {% raw %}
interp.show_batch()
{% endraw %} {% raw %}
interp.show_batch(guided=True,cmap='Greys')
{% endraw %} {% raw %}
interp.show_at(0,guided=True,cmap='Greys')
{% endraw %}